Fourier transforms and audio¶
Phys481 Week 9b¶
[13]:
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import scipy.io.wavfile
from IPython.display import Audio
datarootdir = r'../../data/'
[14]:
datafile = 'saucers-dynamics-explorer-gurnett.wav'
datarate, wavedata = scipy.io.wavfile.read(datarootdir + datafile)
S, freqs, bins, im = plt.specgram(wavedata, NFFT=1024, noverlap=512) # , Fs=fs
# Plot a spectrogram
plt.xlabel('Time') ; plt.ylabel('Frequency') ; plt.title(datafile)
#S, freqs, bins, im = plt.specgram(wavedata, NFFT=1024, Fs=fs, noverlap=512)
Audio( wavedata.T, rate= 20000)
[14]:
Sinusoid with a noisy background.¶
Add random (“white”) noise to a sinuosid to make a signal that looks messy in the time domain. A Fourier transform will re-arrange the information, placing the harmonic energy in a few bins while the noise will be approximately uniformly distributed.
[15]:
nsamples = 4096 #1024
signal = np.sin( 0.25*np.arange(0, nsamples) )
noise = np.random.randn(nsamples)
fig, axes = plt.subplots(1, 3, figsize=(15,4), sharex=True, sharey=True)
plt.sca( axes[0] )
plt.plot(signal)
plt.title('Signal')
plt.xlabel( 'Sample #' )
plt.sca( axes[1] )
plt.plot( noise )
plt.title('Noise')
plt.sca( axes[2] )
plt.plot( signal + noise )
plt.title( 'Signal\n+\nNoise' )
plt.xlim(0, 512)
[15]:
(0.0, 512.0)
Fourier transform¶
[30]:
fft_signal = np.fft.fft( signal )
fft_noise = np.fft.fft( noise )
fft_freq = np.fft.fftfreq( len(signal) )
fig, axes = plt.subplots(1, 3, figsize=(15,4), sharex=True, sharey=True)
plt.sca( axes[0] )
plt.plot( fft_freq, np.abs(fft_signal) )
plt.title('Signal')
plt.xlabel( 'frequency' )
plt.sca( axes[1] )
plt.plot( fft_freq, np.abs(fft_noise) )
plt.title('Noise')
plt.sca( axes[2] )
plt.plot( fft_freq, np.abs(fft_signal + fft_noise) )
plt.title( 'Signal\n+\nNoise' )
[30]:
Text(0.5, 1.0, 'Signal\n+\nNoise')
Low-pass filter¶
Chop out a lot of the noise, apply the inverse Fourier transform, and get a somewhat nicer sinusoid.
[17]:
fft_lowpass = np.abs(fft_freq) <= 0.075
fig, axes = plt.subplots(1, 2, figsize=(15,4))
plt.sca( axes[0] )
plt.plot( fft_freq, np.abs(fft_signal + fft_noise) )
plt.plot( fft_freq, fft_lowpass*2200, lw=3, alpha=0.5 )
plt.title( 'low-pass filter' )
plt.xlabel( 'frequency' )
plt.sca( axes[1] )
fft_lowpass_filtered = ( fft_signal + fft_noise) * fft_lowpass
lowpass_filtered = np.fft.ifft( fft_lowpass_filtered ).real
plt.plot( lowpass_filtered )
plt.xlabel( 'sample #')
plt.title( 'low-pass filtered\nSignal+Noise' )
plt.xlim(0, 1024)
[17]:
(0.0, 1024.0)
[18]:
import IPython.display
from ipywidgets import Layout
from IPython.display import Audio
#
#
IPython.display.display( Audio(signal, rate=4000) )
IPython.display.display( Audio(noise, rate=4000) )
IPython.display.display( Audio(signal+noise, rate=4000) )
IPython.display.display( Audio(lowpass_filtered, rate=4000) )
Audio processing¶
[19]:
from IPython.display import Audio
# constant frequency tone
#
Audio(np.sin(np.linspace(0, 3000, 20000)), rate=20000)
[19]:
[20]:
# mixture of two sinusoids
#
s1 = np.sin(np.linspace(0, 3000, 20000))
s2 = np.sin(np.linspace(1000, 8000, 20000))
Audio(s1+s2, rate=20000)
[20]:
[21]:
# white noise - WARNING! turn down volume
#
data = np.random.uniform(-1,1,44100) # 44100 random samples between -1 and 1
scaled = np.int16(data/np.max(np.abs(data)) * 32767)
Audio( scaled, rate= 20000)
[21]:
The music of the spheres?¶
[25]:
import scipy.io.wavfile
#http://www.astrosurf.com/luxorion/audiofiles-geomagnetosphere.htm
#url = r'http://www.astrosurf.com/luxorion/Radio/chorus-cluster2-gurnett.wav'
datafile = 'chorus-cluster2-gurnett.wav'
datarate, wavedata = scipy.io.wavfile.read(datarootdir + datafile)
Audio( wavedata, rate=datarate)
[25]:
[26]:
# show time series
#
plt.clf() ; plt.plot( wavedata )
plt.xlabel('sample #') ; plt.ylabel('7-bit signed amplitude') ; plt.title(datafile)
[26]:
Text(0.5, 1.0, 'chorus-cluster2-gurnett.wav')
[42]:
# show power spectrum
#
freqdata = np.fft.fft(wavedata)
freqscale = np.fft.fftfreq( len(wavedata))
freqdata[0] = 0 # there is often a spike at zero frequence
plt.plot( freqscale, np.abs(freqdata) )
plt.xlabel('frequency') ; plt.ylabel('amplitude') ; plt.title(datafile)
print(freqdata.shape, freqscale.shape )
(2742440,) (2742440,)
[43]:
# isolate low frequencies
#
lowpass = np.abs(freqscale) < 0.05
lowdata = np.real( np.fft.fft( lowpass * freqdata ) )
lowdata /= np.max(np.abs(lowdata)) / 32767
lowdata -= np.mean(lowdata)
Audio( lowdata, rate= 44100)
[43]:
[44]:
# isolate high frequencies
#
highpass = np.abs(freqscale) > 0.05
highdata = np.real( np.fft.fft( highpass * freqdata ) )
highdata /= np.max(np.abs(highdata)) / 32767
highdata -= np.mean(highdata)
Audio( highdata, rate= 44100)
[44]:
[45]:
# take a look at a small segment to see high/low frequency oscillations
plt.plot( lowdata[0:999])
plt.plot( highdata[0:999])
plt.xlabel( 'sample #' ) ; plt.title(datafile)
[45]:
Text(0.5, 1.0, 'chorus-cluster2-gurnett.wav')
[46]:
# 50% of low frequencies in one ear, 200% of high freqencies in the other.
#
stereodata = np.array( [0.5*lowdata, 2.0*highdata] )
stereodata.shape
Audio( stereodata, rate= 44100)
[46]: